IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
{
+ u64 gpfn;
#ifdef VTLB_DEBUG
int slot;
u64 ps, va;
return IA64_FAULT;
}
#endif //VTLB_DEBUG
+ gpfn = (pte & _PAGE_PPN_MASK)>> PAGE_SHIFT;
+ if (VMX_DOMAIN(vcpu)) {
+ if (__gpfn_is_io(vcpu->domain, gpfn))
+ pte |= VTLB_PTE_IO;
+ else
+ /* Ensure WB attribute if pte is related to a normal mem page,
+ * which is required by vga acceleration since qemu maps shared
+ * vram buffer with WB.
+ */
+ pte &= ~_PAGE_MA_MASK;
+ }
thash_purge_and_insert(vcpu, pte, itir, ifa);
return IA64_NO_FAULT;
// update pte
npte = pfn_pte(mfn, __pgprot(__DIRTY_BITS | _PAGE_PL_2 | arflags));
old_pte = ptep_xchg(mm, mpaddr, pte, npte);
- if (!pte_none(old_pte)) {
+ if (pte_mem(old_pte)) {
unsigned long old_mfn;
struct page_info* old_page;
#define pte_dirty(pte) ((pte_val(pte) & _PAGE_D) != 0)
#define pte_young(pte) ((pte_val(pte) & _PAGE_A) != 0)
#define pte_file(pte) ((pte_val(pte) & _PAGE_FILE) != 0)
+#ifdef XEN
+#define pte_mem(pte) \
+ (!(pte_val(pte) & (GPFN_IO_MASK | GPFN_INV_MASK)) && !pte_none(pte))
+#endif
/*
* Note: we convert AR_RWX to AR_RX and AR_RW to AR_R by clearing the 2nd bit in the
* access rights:
#define shadow_mode_translate(d) (1)
-// for granttab transfer. XENMEM_populate_physmap
+/*
+ * Utilities to change relationship of gpfn->mfn for designated domain,
+ * which is required by gnttab transfer, balloon, device model and etc.
+ */
void guest_physmap_add_page(struct domain *d, unsigned long gpfn, unsigned long mfn);
-// for balloon driver. XENMEM_decrease_reservation
void guest_physmap_remove_page(struct domain *d, unsigned long gpfn, unsigned long mfn);
#endif